Rework hvm_wait_io() -- now tries to be a polite user of
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 11 Feb 2006 12:06:49 +0000 (13:06 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 11 Feb 2006 12:06:49 +0000 (13:06 +0100)
event channels by re-setting the selector and master
pending flags when it exits. Should make for better
behaviour when there are others using the event channels.

This needs some testing to be sure it doesn't break
anything or trigger latent bugs.

Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/hvm/io.c
xen/arch/x86/hvm/svm/vmcb.c
xen/arch/x86/hvm/vmx/io.c
xen/include/asm-x86/hvm/support.h

index 8e725fcecc9191b2bbfdfb33816dc916dc60aa68..9f32792e8fa4a4e61fa10468b726c95bcea45db1 100644 (file)
@@ -690,62 +690,41 @@ void hvm_io_assist(struct vcpu *v)
     }
 }
 
-int hvm_clear_pending_io_event(struct vcpu *v)
+/*
+ * On exit from hvm_wait_io, we're guaranteed not to be waiting on
+ * I/O response from the device model.
+ */
+void hvm_wait_io(void)
 {
-    struct domain *d = v->domain;
+    struct vcpu *v = current;
+    struct domain *d = v->domain;    
     int port = iopacket_port(d);
 
-    /* evtchn_pending_sel bit is shared by other event channels. */
-    if (!d->shared_info->evtchn_pending[port/BITS_PER_LONG])
+    for ( ; ; )
+    {
+        /* Clear master flag, selector flag, event flag each in turn. */
+        v->vcpu_info->evtchn_upcall_pending = 0;
+        smp_mb__before_clear_bit();
         clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
+        smp_mb__after_clear_bit();
+        if ( test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]) )
+            hvm_io_assist(v);
 
-    /* Note: HVM domains may need upcalls as well. */
-    if (!v->vcpu_info->evtchn_pending_sel)
-        clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
+        /* Need to wait for I/O responses? */
+        if ( !test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
+            break;
 
-    /* Clear the pending bit for port. */
-    return test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]);
-}
+        do_sched_op(SCHEDOP_block, 0);
+    }
 
-/*
- * Because we've cleared the pending events first, we need to guarantee that
- * all events to be handled by xen for HVM domains are taken care of here.
- *
- * interrupts are guaranteed to be checked before resuming guest.
- * HVM upcalls have been already arranged for if necessary.
- */
-void hvm_check_events(struct vcpu *v)
-{
     /*
-     * Clear the event *before* checking for work. This should
-     * avoid the set-and-check races
+     * Re-set the selector and master flags in case any other notifications
+     * are pending.
      */
-    if (hvm_clear_pending_io_event(current))
-        hvm_io_assist(v);
-}
-
-/*
- * On exit from hvm_wait_io, we're guaranteed to have a I/O response
- * from the device model.
- */
-void hvm_wait_io(void)
-{
-    int port = iopacket_port(current->domain);
-
-    do {
-        if (!test_bit(port, &current->domain->shared_info->evtchn_pending[0]))
-           do_sched_op(SCHEDOP_block, 0);
-
-        hvm_check_events(current);
-        if (!test_bit(ARCH_HVM_IO_WAIT, &current->arch.hvm_vcpu.ioflags))
-            break;
-        /*
-        * Events other than IOPACKET_PORT might have woken us up.
-        * In that case, safely go back to sleep.
-        */
-        clear_bit(port/BITS_PER_LONG, &current->vcpu_info->evtchn_pending_sel);
-        clear_bit(0, &current->vcpu_info->evtchn_upcall_pending);
-    } while(1);
+    if ( d->shared_info->evtchn_pending[port/BITS_PER_LONG] )
+        set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
+    if ( v->vcpu_info->evtchn_pending_sel )
+        v->vcpu_info->evtchn_upcall_pending = 1;
 }
 
 /*
index f55b88dc4541ab7e15ee1d67dfb8db2a8f779438..996c4deaa15b1bb5098c786fcc2ee2f593763ef9 100644 (file)
@@ -489,13 +489,8 @@ void svm_do_resume(struct vcpu *v)
 {
     struct hvm_virpit *vpit = &v->domain->arch.hvm_domain.vpit;
     
-    if (event_pending(v)) 
-    {
-        hvm_check_events(v);
-
-        if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags))
-            hvm_wait_io();
-    }
+    if ( event_pending(v) )
+        hvm_wait_io();
 
     /* pick up the elapsed PIT ticks and re-enable pit_timer */
     if ( vpit->first_injected ) {
index 0d9449bb022211b1c709df946eccd86fda42ea72..ac2a6b60c54601dc0e5a4e8fdd8497985ede6625 100644 (file)
@@ -177,17 +177,13 @@ void vmx_do_resume(struct vcpu *v)
 
     vmx_stts();
 
-    if (event_pending(v)) {
-        hvm_check_events(v);
+    if ( event_pending(v) )
+        hvm_wait_io();
 
-        if (test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags))
-            hvm_wait_io();
-    }
     /* pick up the elapsed PIT ticks and re-enable pit_timer */
-    if ( vpit->first_injected ) {
+    if ( vpit->first_injected )
         pickup_deactive_ticks(vpit);
-    }
-    vmx_set_tsc_shift(v,vpit);
+    vmx_set_tsc_shift(v, vpit);
 
     /* We can't resume the guest if we're waiting on I/O */
     ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
index 193de6f886782f956942ef86b5cd1caae00b0af9..07dac5f52ee3b2b373de950a239c1ae2089ee5fc 100644 (file)
@@ -141,7 +141,6 @@ extern int hvm_copy(void *buf, unsigned long vaddr, int size, int dir);
 extern void hvm_setup_platform(struct domain* d);
 extern int hvm_mmio_intercept(ioreq_t *p);
 extern int hvm_io_intercept(ioreq_t *p, int type);
-extern void hvm_check_events(struct vcpu *v);
 extern void hvm_hooks_assist(struct vcpu *v);
 extern void hvm_print_line(struct vcpu *v, const char c);
 extern void hlt_timer_fn(void *data);